static int l1, l2, oos_count, page_count;
#define FILE_AND_LINE 0
+//#define MFN_TO_WATCH 0x4700
#if FILE_AND_LINE
#define adjust(_p, _a) _adjust((_p), (_a), __FILE__, __LINE__)
{
int errors = 0;
int shadow_enabled = shadow_mode_enabled(d) ? 1 : 0;
+ int l2limit;
void _adjust(struct pfn_info *page, int adjtype ADJUST_EXTRA_ARGS)
{
+#ifdef MFN_TO_WATCH
+ if (page_to_pfn(page) == MFN_TO_WATCH)
+ {
+ APRINTK("adjust(mfn=%p, dir=%d, adjtype=%d) MFN_TO_WATCH",
+ page_to_pfn(page), dir, adjtype);
+ }
+#endif
if ( adjtype )
{
// adjust the type count
if ( count < 0 )
{
- APRINTK("Audit %d: general count went below zero pfn=%x t=%x ot=%x",
+ APRINTK("Audit %d: general count went below zero mfn=%x t=%x ot=%x",
d->id, page-frame_table,
page->u.inuse.type_info,
page->tlbflush_timestamp);
}
else if ( (count & ~PGT_count_mask) != 0 )
{
- APRINTK("Audit %d: general count overflowed pfn=%x t=%x ot=%x",
+ APRINTK("Audit %d: general count overflowed mfn=%x t=%x ot=%x",
d->id, page-frame_table,
page->u.inuse.type_info,
page->tlbflush_timestamp);
page->count_info += dir;
}
- void adjust_l2_page(unsigned long mfn, int adjtype)
+ void adjust_l2_page(unsigned long mfn)
{
unsigned long *pt = map_domain_mem(mfn << PAGE_SHIFT);
- int i, limit;
-
- if ( shadow_mode_external(d) )
- limit = L2_PAGETABLE_ENTRIES;
- else
- limit = DOMAIN_ENTRIES_PER_L2_PAGETABLE;
+ int i;
- for ( i = 0; i < limit; i++ )
+ for ( i = 0; i < l2limit; i++ )
{
if ( pt[i] & _PAGE_PRESENT )
{
}
}
- adjust(l1page, adjtype);
+ adjust(l1page, !shadow_enabled);
+ }
+ }
+
+ if ( shadow_mode_translate(d) && !shadow_mode_external(d) )
+ {
+ unsigned long hl2mfn =
+ pt[l2_table_offset(LINEAR_PT_VIRT_START)] >> PAGE_SHIFT;
+ struct pfn_info *hl2page = pfn_to_page(hl2mfn);
+ adjust(hl2page, 0);
+ }
+
+ unmap_domain_mem(pt);
+ }
+
+ void adjust_hl2_page(unsigned long hl2mfn)
+ {
+ unsigned long *pt = map_domain_mem(hl2mfn << PAGE_SHIFT);
+ int i;
+
+ for ( i = 0; i < l2limit; i++ )
+ {
+ if ( pt[i] & _PAGE_PRESENT )
+ {
+ unsigned long gmfn = pt[i] >> PAGE_SHIFT;
+ struct pfn_info *gpage = pfn_to_page(gmfn);
+
+ if ( gmfn < 0x100 )
+ {
+ lowmem_mappings++;
+ continue;
+ }
+
+ if ( gmfn > max_page )
+ {
+ io_mappings++;
+ continue;
+ }
+
+ if ( noisy )
+ {
+ if ( page_get_owner(gpage) != d )
+ {
+ printk("Audit %d: [hl2mfn=%p,i=%x] Skip foreign page "
+ "dom=%p (id=%d) mfn=%p c=%08x t=%08x\n",
+ d->id, hl2mfn, i,
+ page_get_owner(gpage),
+ page_get_owner(gpage)->id,
+ gmfn,
+ gpage->count_info,
+ gpage->u.inuse.type_info);
+ continue;
+ }
+ }
+ adjust(gpage, 0);
}
}
case PGT_snapshot:
break;
case PGT_l1_shadow:
- case PGT_hl2_shadow:
adjust_l1_page(smfn);
if ( page->u.inuse.type_info & PGT_pinned )
adjust(page, 0);
break;
+ case PGT_hl2_shadow:
+ adjust_hl2_page(smfn);
+ if ( page->u.inuse.type_info & PGT_pinned )
+ adjust(page, 0);
+ break;
case PGT_l2_shadow:
- adjust_l2_page(smfn, 0);
+ adjust_l2_page(smfn);
if ( page->u.inuse.type_info & PGT_pinned )
adjust(page, 0);
break;
if ( !(oos->writable_pl1e & (sizeof(l1_pgentry_t)-1)) )
adjust(pfn_to_page(oos->writable_pl1e >> PAGE_SHIFT), 0);
+ if ( oos->snapshot_mfn != SHADOW_SNAPSHOT_ELSEWHERE )
+ adjust(pfn_to_page(oos->snapshot_mfn), 0);
+
oos = oos->next;
oos_count++;
}
adjust(page, 1);
if ( page->u.inuse.type_info & PGT_validated )
- adjust_l2_page(mfn, 1);
+ adjust_l2_page(mfn);
break;
}
}
+ if ( shadow_mode_external(d) )
+ l2limit = L2_PAGETABLE_ENTRIES;
+ else
+ l2limit = DOMAIN_ENTRIES_PER_L2_PAGETABLE;
+
adjust_for_pgtbase();
adjust_guest_pages();
#ifndef NDEBUG
-void _audit_domain(struct domain *d, int flags, const char *file, int line)
+void _audit_domain(struct domain *d, int flags)
{
void scan_for_pfn_in_mfn(struct domain *d, unsigned long xmfn,
unsigned long mfn)
struct pfn_info *page;
int errors = 0;
+ if ( (d != current->domain) && shadow_mode_translate(d) )
+ {
+ printk("skipping audit domain of translated domain %d "
+ "from other context\n",
+ d->id);
+ return;
+ }
+
if ( d != current->domain )
domain_pause(d);
synchronise_pagetables(~0UL);
page_type = a->gpfn_and_flags & PGT_type_mask;
switch ( page_type ) {
- case PGT_snapshot:
- // XXX -- what should we check here?
- break;
case PGT_l1_shadow:
case PGT_l2_shadow:
+ case PGT_hl2_shadow:
+ case PGT_snapshot:
if ( ((page->u.inuse.type_info & PGT_type_mask) != page_type ) ||
(page->count_info != 0) )
{
}
break;
- case PGT_hl2_shadow: // haven't thought about this case yet.
default:
BUG();
break;
spin_unlock(&d->page_alloc_lock);
if ( !(flags & AUDIT_QUIET) )
- printk("Audit dom%d (%s:%d) Done. "
+ printk("Audit dom%d Done. "
"pages=%d oos=%d l1=%d l2=%d ctot=%d ttot=%d\n",
- d->id, file, line, page_count, oos_count, l1, l2, ctot, ttot );
+ d->id, page_count, oos_count, l1, l2, ctot, ttot);
if ( !(flags & AUDIT_ALREADY_LOCKED) )
shadow_unlock(d);
max_type = PGT_l1_shadow;
}
FSH_LOG("shadow_promote gpfn=%p gmfn=%p nt=%p min=%p max=%p",
- gmfn, gmfn, new_type, min_type, max_type);
+ gpfn, gmfn, new_type, min_type, max_type);
if ( min_type <= max_type )
- shadow_remove_all_write_access(d, min_type, max_type, gpfn);
+ shadow_remove_all_write_access(d, min_type, max_type, gpfn, gmfn);
// To convert this page to use as a page table, the writable count
// should now be zero. Test this by grabbing the page as an page table,
break;
}
- set_shadow_status(d, gpfn, smfn, psh_type);
+ set_shadow_status(d, gpfn, gmfn, smfn, psh_type);
if ( pin )
shadow_pin(smfn);
// map the phys_to_machine map into the Read-Only MPT space for this domain
mpl2e[l2_table_offset(RO_MPT_VIRT_START)] =
- mk_l2_pgentry(pagetable_val(ed->arch.phys_table) | __PAGE_HYPERVISOR);
+ mk_l2_pgentry(pagetable_val(d->arch.phys_table) | __PAGE_HYPERVISOR);
ed->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT);
ed->arch.monitor_vtable = mpl2e;
ed->arch.monitor_vtable = 0;
}
+static int
+alloc_p2m_table(struct domain *d)
+{
+ struct list_head *list_ent;
+ struct pfn_info *page, *l2page, *l1page;
+ l2_pgentry_t *l2, l2e, last_l2e = mk_l2_pgentry(0);
+ l1_pgentry_t *l1 = NULL;
+ unsigned long va, mfn, pfn;
+
+ l2page = alloc_domheap_page(NULL);
+ if ( !l2page )
+ return 0;
+ d->arch.phys_table = mk_pagetable(page_to_pfn(l2page) << PAGE_SHIFT);
+ l2 = map_domain_mem(page_to_pfn(l2page) << PAGE_SHIFT);
+ memset(l2, 0, PAGE_SIZE);
+
+ list_ent = d->page_list.next;
+ while ( list_ent != &d->page_list )
+ {
+ page = list_entry(list_ent, struct pfn_info, list);
+ mfn = page_to_pfn(page);
+ pfn = machine_to_phys_mapping[mfn];
+ ASSERT(pfn != INVALID_M2P_ENTRY);
+ ASSERT(pfn < (1u<<20));
+
+ va = pfn << PAGE_SHIFT;
+ if ( !l2_pgentry_val(l2e = l2[l2_table_offset(va)]) )
+ {
+ l1page = alloc_domheap_page(NULL);
+ if ( !l1page )
+ return 0;
+ l2e = l2[l2_table_offset(va)] =
+ mk_l2_pgentry((page_to_pfn(l1page) << PAGE_SHIFT) |
+ __PAGE_HYPERVISOR);
+ }
+
+ if ( l2_pgentry_val(last_l2e) != l2_pgentry_val(l2e) )
+ {
+ if ( l1 )
+ unmap_domain_mem(l1);
+ l1 = map_domain_mem(l2_pgentry_val(l2e) & PAGE_MASK);
+ last_l2e = l2e;
+ }
+
+ l1[l1_table_offset(va)] = mk_l1_pgentry((mfn << PAGE_SHIFT) |
+ __PAGE_HYPERVISOR);
+ list_ent = page->list.next;
+ }
+
+ if ( l1 )
+ unmap_domain_mem(l1);
+ unmap_domain_mem(l2);
+
+ return 1;
+}
+
+static void
+free_p2m_table(struct domain *d)
+{
+ // uh, this needs some work... :)
+ BUG();
+}
+
int __shadow_mode_enable(struct domain *d, unsigned int mode)
{
struct exec_domain *ed;
+ int new_modes = (mode & ~d->arch.shadow_mode);
+
+ // Gotta be adding something to call this function.
+ ASSERT(new_modes);
+
+ // can't take anything away by calling this function.
+ ASSERT(!(d->arch.shadow_mode & ~mode));
for_each_exec_domain(d, ed)
{
}
}
- if ( !d->arch.shadow_ht )
+ if ( new_modes & SHM_enable )
{
+ ASSERT( !d->arch.shadow_ht );
d->arch.shadow_ht = xmalloc_array(struct shadow_status, shadow_ht_buckets);
if ( d->arch.shadow_ht == NULL )
goto nomem;
shadow_ht_buckets * sizeof(struct shadow_status));
}
- if ( shadow_mode_log_dirty(d) && !d->arch.shadow_dirty_bitmap )
+ if ( new_modes & SHM_log_dirty )
{
+ ASSERT( !d->arch.shadow_dirty_bitmap );
d->arch.shadow_dirty_bitmap_size = (d->max_pages + 63) & ~63;
d->arch.shadow_dirty_bitmap =
xmalloc_array(unsigned long, d->arch.shadow_dirty_bitmap_size /
d->arch.shadow_dirty_bitmap_size/8);
}
+ if ( new_modes & SHM_translate )
+ {
+ if ( !(new_modes & SHM_external) )
+ {
+ ASSERT( !pagetable_val(d->arch.phys_table) );
+ if ( !alloc_p2m_table(d) )
+ {
+ printk("alloc_p2m_table failed (out-of-memory?)\n");
+ goto nomem;
+ }
+ }
+ else
+ {
+ // external guests provide their own memory for their P2M maps.
+ //
+ unsigned long mfn = pagetable_val(d->arch.phys_table)>>PAGE_SHIFT;
+ ASSERT( d == page_get_owner(&frame_table[mfn]) );
+ }
+ }
+
printk("audit1\n");
- _audit_domain(d, AUDIT_ALREADY_LOCKED | AUDIT_ERRORS_OK, __FILE__, __LINE__);
+ _audit_domain(d, AUDIT_ALREADY_LOCKED | AUDIT_ERRORS_OK);
printk("audit1 done\n");
// Get rid of any shadow pages from any previous shadow mode.
free_shadow_pages(d);
printk("audit2\n");
- _audit_domain(d, AUDIT_ALREADY_LOCKED | AUDIT_ERRORS_OK, __FILE__, __LINE__);
+ _audit_domain(d, AUDIT_ALREADY_LOCKED | AUDIT_ERRORS_OK);
printk("audit2 done\n");
// Turn off writable page tables.
// It doesn't mix with shadow mode.
+ // And shadow mode offers a superset of functionality.
//
vm_assist(d, VMASST_CMD_disable, VMASST_TYPE_writable_pagetables);
audit_adjust_pgtables(d, 1, 1);
printk("audit3\n");
- _audit_domain(d, AUDIT_ALREADY_LOCKED, __FILE__, __LINE__);
+ _audit_domain(d, AUDIT_ALREADY_LOCKED);
printk("audit3 done\n");
return 0;
nomem:
- if ( d->arch.shadow_ht != NULL )
+ if ( (new_modes & SHM_enable) && (d->arch.shadow_ht != NULL) )
+ {
xfree(d->arch.shadow_ht);
- d->arch.shadow_ht = NULL;
+ d->arch.shadow_ht = NULL;
+ }
+ if ( (new_modes & SHM_log_dirty) && (d->arch.shadow_dirty_bitmap != NULL) )
+ {
+ xfree(d->arch.shadow_dirty_bitmap);
+ d->arch.shadow_dirty_bitmap = NULL;
+ }
+ if ( (new_modes & SHM_translate) && !(new_modes & SHM_external) &&
+ pagetable_val(d->arch.phys_table) )
+ {
+ free_p2m_table(d);
+ }
return -ENOMEM;
}
return rc;
}
+static void
+translate_l1pgtable(struct domain *d, l1_pgentry_t *p2m, unsigned long l1mfn)
+{
+ int i;
+ l1_pgentry_t *l1;
+
+ l1 = map_domain_mem(l1mfn << PAGE_SHIFT);
+ for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
+ {
+ if ( is_guest_l1_slot(i) &&
+ (l1_pgentry_val(l1[i]) & _PAGE_PRESENT) )
+ {
+ unsigned long mfn = l1_pgentry_val(l1[i]) >> PAGE_SHIFT;
+ unsigned long gpfn = __mfn_to_gpfn(d, mfn);
+ ASSERT((l1_pgentry_val(p2m[gpfn]) >> PAGE_SHIFT) == mfn);
+ l1[i] = mk_l1_pgentry((gpfn << PAGE_SHIFT) |
+ (l1_pgentry_val(l1[i]) & ~PAGE_MASK));
+ }
+ }
+ unmap_domain_mem(l1);
+}
+
+// This is not general enough to handle arbitrary pagetables
+// with shared L1 pages, etc., but it is sufficient for bringing
+// up dom0.
+//
+void
+translate_l2pgtable(struct domain *d, l1_pgentry_t *p2m, unsigned long l2mfn)
+{
+ int i;
+ l2_pgentry_t *l2;
+
+ ASSERT(shadow_mode_translate(d) && !shadow_mode_external(d));
+
+ l2 = map_domain_mem(l2mfn << PAGE_SHIFT);
+ for (i = 0; i < L2_PAGETABLE_ENTRIES; i++)
+ {
+ if ( is_guest_l2_slot(i) &&
+ (l2_pgentry_val(l2[i]) & _PAGE_PRESENT) )
+ {
+ unsigned long mfn = l2_pgentry_val(l2[i]) >> PAGE_SHIFT;
+ unsigned long gpfn = __mfn_to_gpfn(d, mfn);
+ ASSERT((l1_pgentry_val(p2m[gpfn]) >> PAGE_SHIFT) == mfn);
+ l2[i] = mk_l2_pgentry((gpfn << PAGE_SHIFT) |
+ (l2_pgentry_val(l2[i]) & ~PAGE_MASK));
+ translate_l1pgtable(d, p2m, mfn);
+ }
+ }
+ unmap_domain_mem(l2);
+}
+
static void free_shadow_ht_entries(struct domain *d)
{
struct shadow_status *x, *n;
shadow_unlock(d);
}
+static unsigned long
+gpfn_to_mfn_safe(struct domain *d, unsigned long gpfn)
+{
+ ASSERT( shadow_mode_translate(d) );
+
+ perfc_incrc(gpfn_to_mfn_safe);
+
+ unsigned long va = gpfn << PAGE_SHIFT;
+ unsigned long phystab = pagetable_val(d->arch.phys_table);
+ l2_pgentry_t *l2 = map_domain_mem(phystab);
+ l2_pgentry_t l2e = l2[l2_table_offset(va)];
+ unmap_domain_mem(l2);
+ if ( !(l2_pgentry_val(l2e) & _PAGE_PRESENT) )
+ {
+ printk("gpfn_to_mfn_safe(d->id=%d, gpfn=%p) => 0 l2e=%p\n",
+ d->id, gpfn, l2_pgentry_val(l2e));
+ return 0;
+ }
+ unsigned long l1tab = l2_pgentry_val(l2e) & PAGE_MASK;
+ l1_pgentry_t *l1 = map_domain_mem(l1tab);
+ l1_pgentry_t l1e = l1[l1_table_offset(va)];
+ unmap_domain_mem(l1);
+
+ printk("gpfn_to_mfn_safe(d->id=%d, gpfn=%p) => %p phystab=%p l2e=%p l1tab=%p, l1e=%p\n",
+ d->id, gpfn, l1_pgentry_val(l1e) >> PAGE_SHIFT, phystab, l2e, l1tab, l1e);
+
+ if ( !(l1_pgentry_val(l1e) & _PAGE_PRESENT) )
+ {
+ printk("gpfn_to_mfn_safe(d->id=%d, gpfn=%p) => 0 l1e=%p\n",
+ d->id, gpfn, l1_pgentry_val(l1e));
+ return 0;
+ }
+
+ return l1_pgentry_val(l1e) >> PAGE_SHIFT;
+}
+
static unsigned long
shadow_hl2_table(struct domain *d, unsigned long gpfn, unsigned long gmfn,
unsigned long smfn)
perfc_incrc(shadow_hl2_table_count);
- ASSERT( pagetable_val(current->arch.guest_table) == (gmfn << PAGE_SHIFT) );
- gl2 = current->arch.guest_vtable;
-
+ gl2 = map_domain_mem(gmfn << PAGE_SHIFT);
hl2 = map_domain_mem(hl2mfn << PAGE_SHIFT);
if ( shadow_mode_external(d) )
else
limit = DOMAIN_ENTRIES_PER_L2_PAGETABLE;
- for ( i = 0; i < limit; i++ )
+ if ( unlikely(current->domain != d) && !shadow_mode_external(d) )
{
- unsigned long gl2e = l2_pgentry_val(gl2[i]);
- unsigned long mfn;
+ // Can't use __gpfn_to_mfn() if we don't have one of this domain's
+ // page tables currently installed. What a pain in the neck!
+ //
+ // This isn't common -- it only happens during shadow mode setup
+ // and mode changes.
+ //
+ perfc_incrc(shadow_hl2_other_domain);
+ for ( i = 0; i < limit; i++ )
+ {
+ unsigned long gl2e = l2_pgentry_val(gl2[i]);
+ unsigned long mfn;
- if ( gl2e & _PAGE_PRESENT )
+ if ( (gl2e & _PAGE_PRESENT) &&
+ (mfn = gpfn_to_mfn_safe(d, gl2e >> PAGE_SHIFT)) )
+ {
+ hl2[i] = mk_l1_pgentry((mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+ get_page(pfn_to_page(mfn), d);
+ }
+ else
+ {
+ hl2[i] = mk_l1_pgentry(0);
+ }
+ }
+ }
+ else
+ {
+ for ( i = 0; i < limit; i++ )
{
- mfn = __gpfn_to_mfn(d, gl2e >> PAGE_SHIFT);
- hl2[i] = mk_l1_pgentry((mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
- get_page(pfn_to_page(mfn), d);
+ unsigned long gl2e = l2_pgentry_val(gl2[i]);
+ unsigned long mfn;
+
+ if ( (gl2e & _PAGE_PRESENT) &&
+ (mfn = __gpfn_to_mfn(d, gl2e >> PAGE_SHIFT)) )
+ {
+ hl2[i] = mk_l1_pgentry((mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+ get_page(pfn_to_page(mfn), d);
+ }
+ else
+ hl2[i] = mk_l1_pgentry(0);
}
- else
- hl2[i] = mk_l1_pgentry(0);
}
if ( !shadow_mode_external(d) )
}
unmap_domain_mem(hl2);
+ unmap_domain_mem(gl2);
return hl2mfn;
}
&idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
+ spl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
+ mk_l2_pgentry((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+
+ spl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
+ mk_l2_pgentry(__pa(page_get_owner(
+ &frame_table[gmfn])->arch.mm_perdomain_pt) |
+ __PAGE_HYPERVISOR);
+
if ( shadow_mode_translate(d) ) // NB: not external
{
unsigned long hl2mfn;
- if ( unlikely(hl2mfn = __shadow_status(d, gpfn, PGT_hl2_shadow)) )
+
+ spl2e[l2_table_offset(RO_MPT_VIRT_START)] =
+ mk_l2_pgentry(pagetable_val(d->arch.phys_table) |
+ __PAGE_HYPERVISOR);
+
+ if ( unlikely(!(hl2mfn = __shadow_status(d, gpfn, PGT_hl2_shadow))) )
hl2mfn = shadow_hl2_table(d, gpfn, gmfn, smfn);
// shadow_mode_translate (but not external) sl2 tables hold a
else
spl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
mk_l2_pgentry((gmfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
-
- spl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
- mk_l2_pgentry((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
-
- spl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
- mk_l2_pgentry(__pa(page_get_owner(
- &frame_table[gmfn])->arch.mm_perdomain_pt) |
- __PAGE_HYPERVISOR);
}
else
{
return f;
}
-static unsigned long
+static inline unsigned long
shadow_make_snapshot(
struct domain *d, unsigned long gpfn, unsigned long gmfn)
{
}
u32 shadow_remove_all_write_access(
- struct domain *d, unsigned min_type, unsigned max_type, unsigned long gpfn)
+ struct domain *d, unsigned min_type, unsigned max_type,
+ unsigned long gpfn, unsigned long gmfn)
{
int i;
struct shadow_status *a;
- unsigned long gmfn = __gpfn_to_mfn(d, gpfn);
unsigned long sl1mfn = __shadow_status(d, gpfn, PGT_l1_shadow);
u32 count = 0;
ASSERT( shadow_mode_translate(d) );
+ BUG(); // ref counts for hl2mfn and smfn need to be maintained!
+
mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
mk_l2_pgentry((hl2mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
#define FAIL(_f, _a...) \
do { \
- printk("XXX %s-FAIL (%d,%d)" _f "\n" \
+ printk("XXX %s-FAIL (%d,%d,%d)" _f "\n" \
"g=%08lx s=%08lx &g=%08lx &s=%08lx" \
" v2m(&g)=%08lx v2m(&s)=%08lx ea=%08lx\n", \
- sh_check_name, level, l1_idx, ## _a , \
+ sh_check_name, level, l2_idx, l1_idx, ## _a , \
gpte, spte, pgpte, pspte, \
v2m(pgpte), v2m(pspte), \
(l2_idx << L2_PAGETABLE_SHIFT) | \
if ( (spte & mask) != (gpte & mask) )
FAIL("Corrupt?");
- if ( (spte & _PAGE_DIRTY ) && !(gpte & _PAGE_DIRTY) && !oos_ptes )
+ if ( (level == 1) &&
+ (spte & _PAGE_DIRTY ) && !(gpte & _PAGE_DIRTY) && !oos_ptes )
FAIL("Dirty coherence");
if ( (spte & _PAGE_ACCESSED ) && !(gpte & _PAGE_ACCESSED) && !oos_ptes )
if ( (spte & _PAGE_RW ) && !(gpte & _PAGE_RW) && !oos_ptes )
{
- printk("gpfn=%p gmfn=%p smfn=%p t=0x%08x page_table_page=%d oos_ptes=%d\n",
+ printk("gpfn=%p gmfn=%p smfn=%p t=0x%08x page_table_page=%d "
+ "oos_ptes=%d\n",
gpfn, gmfn, smfn,
frame_table[gmfn].u.inuse.type_info,
page_table_page, oos_ptes);
FAIL("RW coherence");
}
- if ( (spte & _PAGE_RW ) && !((gpte & _PAGE_RW) && (gpte & _PAGE_DIRTY)) && !oos_ptes )
+ if ( (level == 1) &&
+ (spte & _PAGE_RW ) &&
+ !((gpte & _PAGE_RW) && (gpte & _PAGE_DIRTY)) &&
+ !oos_ptes )
{
- printk("gpfn=%p gmfn=%p smfn=%p t=0x%08x page_table_page=%d oos_ptes=%d\n",
+ printk("gpfn=%p gmfn=%p smfn=%p t=0x%08x page_table_page=%d "
+ "oos_ptes=%d\n",
gpfn, gmfn, smfn,
frame_table[gmfn].u.inuse.type_info,
page_table_page, oos_ptes);
FAIL("RW2 coherence");
}
- if ( gpfn == smfn )
+ if ( gmfn == smfn )
{
if ( level > 1 )
FAIL("Linear map ???"); /* XXX this will fail on BSD */
sh_l2_present = sh_l1_present = 0;
perfc_incrc(check_pagetable);
- ptbase_pfn = gptbase >> PAGE_SHIFT;
- ptbase_mfn = __gpfn_to_mfn(d, ptbase_pfn);
+ ptbase_mfn = gptbase >> PAGE_SHIFT;
+ ptbase_pfn = __mfn_to_gpfn(d, ptbase_mfn);
if ( !(smfn = __shadow_status(d, ptbase_pfn, PGT_base_page_table)) )
{